In [8]:
import numpy as np
import pandas as pd
import glob
import math
import os
import torch
import random
from torchvision import datasets, transforms, models
from torch import nn, optim
import torch.nn.functional as F
from torch.optim.lr_scheduler import StepLR
import matplotlib.pyplot as plt
from PIL import Image
from pathlib import Path
%matplotlib inline
%config InlineBackend.figure_format = 'retina'
device = 'cuda' if torch.cuda.is_available() else 'cpu'
print(device)
cuda
In [9]:
import os
os.environ['PYTORCH_CUDA_ALLOC_CONF'] = 'expandable_segments:True'
In [10]:
# Get the total number of images in each class
file_count = 0
classes = os.listdir("C:/Users/ALEJANDRO/Documents/7. DUKE/1. AIPI 590 - Computer Vision/assignment/First_group_project/data/train_data")
print('Classes:',len(classes))
for cat in ['train_data', 'test_data']: #'val',
files = glob.glob('C:/Users/ALEJANDRO/Documents/7. DUKE/1. AIPI 590 - Computer Vision/assignment/First_group_project/data/'+cat+'/*/*')
print(cat,'Images:',len(files), end = ', ')
print('\n')
for direc in classes:
files = glob.glob('C:/Users/ALEJANDRO/Documents/7. DUKE/1. AIPI 590 - Computer Vision/assignment/First_group_project/data/*/'+direc+'/*')
file_count += len(files)
print(direc, len(files))
print(file_count)
Classes: 5 train_data Images: 25000, test_data Images: 500, airport_terminal 5100 aquarium 5100 bar 5100 beach 5100 music_studio 5100 25500
Load The Data¶
In [11]:
data_dir = "C:/Users/ALEJANDRO/Documents/7. DUKE/1. AIPI 590 - Computer Vision/assignment/First_group_project/data"
random.seed(41)
#Data Tranforms (Augmentation and Normalization)
train_transforms = transforms.Compose([
transforms.RandomRotation(30),
transforms.RandomPerspective(distortion_scale=0.4),
transforms.RandomHorizontalFlip(0.6),
transforms.Resize(size=(224,224)),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406],
[0.229, 0.224, 0.225])])
val_transforms = transforms.Compose([
transforms.RandomRotation(30),
transforms.RandomHorizontalFlip(0.6),
transforms.Resize(size=(224,224)),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406],
[0.229, 0.224, 0.225])])
test_transforms = transforms.Compose([transforms.Resize(size=(224,224)),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406],
[0.229, 0.224, 0.225])])
#Getting all the data with PyTorch Datasets
train_data = datasets.ImageFolder(data_dir + '/train_data', transform= train_transforms)
val_data = datasets.ImageFolder(data_dir + '/test_data', transform= val_transforms)
test_data = datasets.ImageFolder(data_dir + '/test_data', transform= test_transforms)
#Loading the data into PyTorch DataLoader
train_loader = torch.utils.data.DataLoader(train_data, batch_size= 128, shuffle = True,num_workers=2)
valid_loader = torch.utils.data.DataLoader(val_data, batch_size= 64, shuffle = True,num_workers=2)
test_loader = torch.utils.data.DataLoader(test_data, batch_size= 64, shuffle = True,num_workers=2)
#Creating a dictionary of all classes
classes = dict(zip(list(range(len(train_data.classes))),train_data.classes))
Check the images with labels¶
In [12]:
def denormalise(image):
try:
image = image.cpu().numpy().transpose(1, 2, 0)
except:
image = image.transpose(1, 2, 0) # PIL images have channel last
mean = [0.485, 0.456, 0.406]
stdd = [0.229, 0.224, 0.225]
image = (image * stdd + mean).clip(0, 1)
return image
dataiter = iter(train_loader)
images, labels = next(dataiter)
images = images.numpy()
fig = plt.figure(figsize=(15, 15))
for idx in np.arange(25):
ax = fig.add_subplot(5, 5, idx + 1, xticks=[], yticks=[])
ax.imshow(denormalise(images[idx]), cmap='gray')
ax.set_title(classes[labels[idx].item()])
Load the model¶
In [13]:
# Specify the device type
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
device
Out[13]:
device(type='cuda')
In [14]:
# Load pretrained GoogleNet
model = models.googlenet(pretrained=True)
# Freeze all parameters except the classifier
for param in model.parameters():
param.requires_grad = False
# Modify the final fully connected layer (fc) of GoogleNet
model.fc = nn.Sequential(
nn.Linear(1024, 512), # 1024 is the input size to the fc layer in GoogleNet
nn.Dropout(0.6),
nn.ReLU(),
nn.Linear(512, 256),
nn.Dropout(0.5),
nn.ReLU(),
nn.Linear(256, 128),
nn.Dropout(0.4),
nn.ReLU(),
nn.Linear(128, len(classes)) # Output size should match the number of your classes
)
# Optionally, disable the AuxLogits to avoid auxiliary classifiers
model.aux_logits = False # Disable auxiliary logits (used for training only in some cases)
# Unfreeze the final classifier layer for training
for param in model.fc.parameters():
param.requires_grad = True
# Move the model to GPU
model = model.to(device)
Specify the Loss Criterion, Optimizer and Scheduler¶
In [16]:
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.fc.parameters(),lr= 1e-4)
scheduler = StepLR(optimizer, step_size=2, gamma=0.35)
Train The Model¶
In [17]:
epochs = 3 # Number of epochs
steps = 0
print_every = 5
train_losses, valid_losses,valid_acc = [], [], [] # List keeping track of losses and accuracy to plot later
valid_loss_min = np.inf # It will be used to save model whenever Vallidation loss decreases
valid_acc_min = 0.0
for e in range(epochs):
train_loss = 0
model.train()
#train the model
for images, labels in train_loader:
steps+=1
# Move tensor to device('cuda' in case of GPU or 'cpu' in case of CPU)
images, labels = images.to(device), labels.to(device)
# Clearing all the previous gradients
optimizer.zero_grad()
# Forward Pass
logits = model(images)
# Loss calculation
loss = criterion(logits,labels)
# Backward Pass
loss.backward()
# Update the parameters
optimizer.step()
# Updating the losses list
train_loss += loss.item()
# Evaluating after specific amount of steps
if steps % print_every == 0:
valid_loss = 0
accuracy = 0
# Setting Model to Evaluation Mode
model.eval()
with torch.no_grad():
# Getting Validation loss
for images, labels in valid_loader:
images, labels = images.to(device), labels.to(device)
logits = model(images)
batch_loss = criterion(logits,labels)
valid_loss += batch_loss.item()
# Calculating Accuracy
output = F.softmax(logits,dim=1)
top_p,top_class = output.topk(1,dim = 1)
equals = top_class == labels.view(*top_class.shape)
accuracy += torch.mean(equals.type(torch.FloatTensor)).item()
# Printing stats
print(f"Epoch {e+1}/{epochs}.. "
f"Train loss: {train_loss/print_every:.3f}.. "
f"Validation loss: {valid_loss/len(valid_loader):.3f}.. "
f"Validation accuracy: {accuracy/len(valid_loader):.3f}.. "
f"LR : {scheduler.get_lr():}"
)
valid_loss = valid_loss/len(valid_loader)
train_losses.append(train_loss/print_every)
valid_losses.append(valid_loss)
valid_acc.append(accuracy/len(valid_loader))
# Checking if Validation loss decreased
if valid_loss <= valid_loss_min:
# if decreased, it will save the model
print('valid loss decreased ({:.6f} --> {:.6f}). Saving model ...'.format(
valid_loss_min,
valid_loss))
torch.save(model.state_dict(), 'C:/Users/ALEJANDRO/Documents/7. DUKE/1. AIPI 590 - Computer Vision/assignment/First_group_project/scenes_googlenet.pt')
valid_loss_min = valid_loss
# Scheduler performing a step to change learning rate of Optimizer
scheduler.step()
c:\Users\ALEJANDRO\Documents\5. Programming\.virtualenvs\aplt_duke\lib\site-packages\torch\optim\lr_scheduler.py:525: UserWarning: To get the last learning rate computed by the scheduler, please use `get_last_lr()`. _warn_get_lr_called_within_step(self)
Epoch 1/3.. Train loss: 1.612.. Validation loss: 1.607.. Validation accuracy: 0.226.. LR : [0.0001] valid loss decreased (inf --> 1.607348). Saving model ... Epoch 1/3.. Train loss: 3.219.. Validation loss: 1.601.. Validation accuracy: 0.343.. LR : [0.0001] valid loss decreased (1.607348 --> 1.601063). Saving model ... Epoch 1/3.. Train loss: 4.817.. Validation loss: 1.591.. Validation accuracy: 0.412.. LR : [0.0001] valid loss decreased (1.601063 --> 1.590840). Saving model ... Epoch 1/3.. Train loss: 6.404.. Validation loss: 1.577.. Validation accuracy: 0.537.. LR : [0.0001] valid loss decreased (1.590840 --> 1.576865). Saving model ... Epoch 1/3.. Train loss: 7.971.. Validation loss: 1.556.. Validation accuracy: 0.588.. LR : [0.0001] valid loss decreased (1.576865 --> 1.556330). Saving model ... Epoch 1/3.. Train loss: 9.521.. Validation loss: 1.528.. Validation accuracy: 0.651.. LR : [0.0001] valid loss decreased (1.556330 --> 1.528277). Saving model ... Epoch 1/3.. Train loss: 11.037.. Validation loss: 1.492.. Validation accuracy: 0.697.. LR : [0.0001] valid loss decreased (1.528277 --> 1.491523). Saving model ... Epoch 1/3.. Train loss: 12.512.. Validation loss: 1.442.. Validation accuracy: 0.714.. LR : [0.0001] valid loss decreased (1.491523 --> 1.442246). Saving model ... Epoch 1/3.. Train loss: 13.933.. Validation loss: 1.376.. Validation accuracy: 0.756.. LR : [0.0001] valid loss decreased (1.442246 --> 1.375597). Saving model ... Epoch 1/3.. Train loss: 15.280.. Validation loss: 1.299.. Validation accuracy: 0.730.. LR : [0.0001] valid loss decreased (1.375597 --> 1.299307). Saving model ... Epoch 1/3.. Train loss: 16.552.. Validation loss: 1.203.. Validation accuracy: 0.767.. LR : [0.0001] valid loss decreased (1.299307 --> 1.203499). Saving model ... Epoch 1/3.. Train loss: 17.730.. Validation loss: 1.100.. Validation accuracy: 0.801.. LR : [0.0001] valid loss decreased (1.203499 --> 1.100064). Saving model ... Epoch 1/3.. Train loss: 18.783.. Validation loss: 1.002.. Validation accuracy: 0.786.. LR : [0.0001] valid loss decreased (1.100064 --> 1.002217). Saving model ... Epoch 1/3.. Train loss: 19.757.. Validation loss: 0.892.. Validation accuracy: 0.831.. LR : [0.0001] valid loss decreased (1.002217 --> 0.891576). Saving model ... Epoch 1/3.. Train loss: 20.660.. Validation loss: 0.814.. Validation accuracy: 0.808.. LR : [0.0001] valid loss decreased (0.891576 --> 0.814084). Saving model ... Epoch 1/3.. Train loss: 21.457.. Validation loss: 0.738.. Validation accuracy: 0.805.. LR : [0.0001] valid loss decreased (0.814084 --> 0.737672). Saving model ... Epoch 1/3.. Train loss: 22.199.. Validation loss: 0.673.. Validation accuracy: 0.814.. LR : [0.0001] valid loss decreased (0.737672 --> 0.672787). Saving model ... Epoch 1/3.. Train loss: 22.857.. Validation loss: 0.602.. Validation accuracy: 0.824.. LR : [0.0001] valid loss decreased (0.672787 --> 0.602154). Saving model ... Epoch 1/3.. Train loss: 23.476.. Validation loss: 0.574.. Validation accuracy: 0.838.. LR : [0.0001] valid loss decreased (0.602154 --> 0.574147). Saving model ... Epoch 1/3.. Train loss: 24.056.. Validation loss: 0.545.. Validation accuracy: 0.816.. LR : [0.0001] valid loss decreased (0.574147 --> 0.544615). Saving model ... Epoch 1/3.. Train loss: 24.630.. Validation loss: 0.519.. Validation accuracy: 0.826.. LR : [0.0001] valid loss decreased (0.544615 --> 0.518826). Saving model ... Epoch 1/3.. Train loss: 25.162.. Validation loss: 0.469.. Validation accuracy: 0.848.. LR : [0.0001] valid loss decreased (0.518826 --> 0.468864). Saving model ... Epoch 1/3.. Train loss: 25.678.. Validation loss: 0.443.. Validation accuracy: 0.872.. LR : [0.0001] valid loss decreased (0.468864 --> 0.443328). Saving model ... Epoch 1/3.. Train loss: 26.206.. Validation loss: 0.457.. Validation accuracy: 0.851.. LR : [0.0001] Epoch 1/3.. Train loss: 26.659.. Validation loss: 0.421.. Validation accuracy: 0.874.. LR : [0.0001] valid loss decreased (0.443328 --> 0.420933). Saving model ... Epoch 1/3.. Train loss: 27.109.. Validation loss: 0.423.. Validation accuracy: 0.857.. LR : [0.0001] Epoch 1/3.. Train loss: 27.599.. Validation loss: 0.410.. Validation accuracy: 0.855.. LR : [0.0001] valid loss decreased (0.420933 --> 0.409551). Saving model ... Epoch 1/3.. Train loss: 28.027.. Validation loss: 0.411.. Validation accuracy: 0.848.. LR : [0.0001] Epoch 1/3.. Train loss: 28.473.. Validation loss: 0.380.. Validation accuracy: 0.883.. LR : [0.0001] valid loss decreased (0.409551 --> 0.379855). Saving model ... Epoch 1/3.. Train loss: 28.903.. Validation loss: 0.362.. Validation accuracy: 0.879.. LR : [0.0001] valid loss decreased (0.379855 --> 0.362285). Saving model ... Epoch 1/3.. Train loss: 29.318.. Validation loss: 0.370.. Validation accuracy: 0.874.. LR : [0.0001] Epoch 1/3.. Train loss: 29.763.. Validation loss: 0.372.. Validation accuracy: 0.890.. LR : [0.0001] Epoch 1/3.. Train loss: 30.181.. Validation loss: 0.374.. Validation accuracy: 0.869.. LR : [0.0001] Epoch 1/3.. Train loss: 30.573.. Validation loss: 0.360.. Validation accuracy: 0.889.. LR : [0.0001] valid loss decreased (0.362285 --> 0.360163). Saving model ... Epoch 1/3.. Train loss: 30.945.. Validation loss: 0.358.. Validation accuracy: 0.877.. LR : [0.0001] valid loss decreased (0.360163 --> 0.358093). Saving model ... Epoch 1/3.. Train loss: 31.368.. Validation loss: 0.354.. Validation accuracy: 0.866.. LR : [0.0001] valid loss decreased (0.358093 --> 0.353981). Saving model ... Epoch 1/3.. Train loss: 31.767.. Validation loss: 0.337.. Validation accuracy: 0.875.. LR : [0.0001] valid loss decreased (0.353981 --> 0.336767). Saving model ... Epoch 1/3.. Train loss: 32.129.. Validation loss: 0.325.. Validation accuracy: 0.886.. LR : [0.0001] valid loss decreased (0.336767 --> 0.324759). Saving model ... Epoch 1/3.. Train loss: 32.489.. Validation loss: 0.323.. Validation accuracy: 0.881.. LR : [0.0001] valid loss decreased (0.324759 --> 0.323215). Saving model ... Epoch 2/3.. Train loss: 0.823.. Validation loss: 0.413.. Validation accuracy: 0.852.. LR : [0.0001] Epoch 2/3.. Train loss: 1.295.. Validation loss: 0.397.. Validation accuracy: 0.864.. LR : [0.0001] Epoch 2/3.. Train loss: 1.717.. Validation loss: 0.349.. Validation accuracy: 0.862.. LR : [0.0001] Epoch 2/3.. Train loss: 2.150.. Validation loss: 0.339.. Validation accuracy: 0.887.. LR : [0.0001] Epoch 2/3.. Train loss: 2.550.. Validation loss: 0.330.. Validation accuracy: 0.877.. LR : [0.0001] Epoch 2/3.. Train loss: 2.913.. Validation loss: 0.346.. Validation accuracy: 0.881.. LR : [0.0001] Epoch 2/3.. Train loss: 3.315.. Validation loss: 0.361.. Validation accuracy: 0.872.. LR : [0.0001] Epoch 2/3.. Train loss: 3.711.. Validation loss: 0.330.. Validation accuracy: 0.881.. LR : [0.0001] Epoch 2/3.. Train loss: 4.107.. Validation loss: 0.338.. Validation accuracy: 0.880.. LR : [0.0001] Epoch 2/3.. Train loss: 4.523.. Validation loss: 0.343.. Validation accuracy: 0.890.. LR : [0.0001] Epoch 2/3.. Train loss: 4.951.. Validation loss: 0.334.. Validation accuracy: 0.885.. LR : [0.0001] Epoch 2/3.. Train loss: 5.304.. Validation loss: 0.333.. Validation accuracy: 0.886.. LR : [0.0001] Epoch 2/3.. Train loss: 5.687.. Validation loss: 0.327.. Validation accuracy: 0.879.. LR : [0.0001] Epoch 2/3.. Train loss: 6.077.. Validation loss: 0.288.. Validation accuracy: 0.904.. LR : [0.0001] valid loss decreased (0.323215 --> 0.288448). Saving model ... Epoch 2/3.. Train loss: 6.441.. Validation loss: 0.325.. Validation accuracy: 0.887.. LR : [0.0001] Epoch 2/3.. Train loss: 6.802.. Validation loss: 0.352.. Validation accuracy: 0.892.. LR : [0.0001] Epoch 2/3.. Train loss: 7.211.. Validation loss: 0.313.. Validation accuracy: 0.882.. LR : [0.0001] Epoch 2/3.. Train loss: 7.607.. Validation loss: 0.306.. Validation accuracy: 0.884.. LR : [0.0001] Epoch 2/3.. Train loss: 7.939.. Validation loss: 0.309.. Validation accuracy: 0.900.. LR : [0.0001] Epoch 2/3.. Train loss: 8.339.. Validation loss: 0.338.. Validation accuracy: 0.864.. LR : [0.0001] Epoch 2/3.. Train loss: 8.708.. Validation loss: 0.300.. Validation accuracy: 0.890.. LR : [0.0001] Epoch 2/3.. Train loss: 9.098.. Validation loss: 0.299.. Validation accuracy: 0.895.. LR : [0.0001] Epoch 2/3.. Train loss: 9.464.. Validation loss: 0.324.. Validation accuracy: 0.873.. LR : [0.0001] Epoch 2/3.. Train loss: 9.791.. Validation loss: 0.314.. Validation accuracy: 0.881.. LR : [0.0001] Epoch 2/3.. Train loss: 10.146.. Validation loss: 0.329.. Validation accuracy: 0.881.. LR : [0.0001] Epoch 2/3.. Train loss: 10.529.. Validation loss: 0.308.. Validation accuracy: 0.903.. LR : [0.0001] Epoch 2/3.. Train loss: 10.867.. Validation loss: 0.304.. Validation accuracy: 0.896.. LR : [0.0001] Epoch 2/3.. Train loss: 11.254.. Validation loss: 0.335.. Validation accuracy: 0.876.. LR : [0.0001] Epoch 2/3.. Train loss: 11.584.. Validation loss: 0.293.. Validation accuracy: 0.902.. LR : [0.0001] Epoch 2/3.. Train loss: 11.970.. Validation loss: 0.310.. Validation accuracy: 0.873.. LR : [0.0001] Epoch 2/3.. Train loss: 12.319.. Validation loss: 0.308.. Validation accuracy: 0.886.. LR : [0.0001] Epoch 2/3.. Train loss: 12.645.. Validation loss: 0.312.. Validation accuracy: 0.877.. LR : [0.0001] Epoch 2/3.. Train loss: 12.976.. Validation loss: 0.309.. Validation accuracy: 0.888.. LR : [0.0001] Epoch 2/3.. Train loss: 13.385.. Validation loss: 0.279.. Validation accuracy: 0.905.. LR : [0.0001] valid loss decreased (0.288448 --> 0.279378). Saving model ... Epoch 2/3.. Train loss: 13.760.. Validation loss: 0.290.. Validation accuracy: 0.886.. LR : [0.0001] Epoch 2/3.. Train loss: 14.149.. Validation loss: 0.312.. Validation accuracy: 0.886.. LR : [0.0001] Epoch 2/3.. Train loss: 14.442.. Validation loss: 0.305.. Validation accuracy: 0.898.. LR : [0.0001] Epoch 2/3.. Train loss: 14.782.. Validation loss: 0.277.. Validation accuracy: 0.901.. LR : [0.0001] valid loss decreased (0.279378 --> 0.276780). Saving model ... Epoch 2/3.. Train loss: 15.100.. Validation loss: 0.285.. Validation accuracy: 0.898.. LR : [0.0001] Epoch 3/3.. Train loss: 0.567.. Validation loss: 0.313.. Validation accuracy: 0.886.. LR : [1.2249999999999998e-05] Epoch 3/3.. Train loss: 0.954.. Validation loss: 0.321.. Validation accuracy: 0.874.. LR : [1.2249999999999998e-05] Epoch 3/3.. Train loss: 1.284.. Validation loss: 0.357.. Validation accuracy: 0.855.. LR : [1.2249999999999998e-05] Epoch 3/3.. Train loss: 1.635.. Validation loss: 0.302.. Validation accuracy: 0.890.. LR : [1.2249999999999998e-05] Epoch 3/3.. Train loss: 1.990.. Validation loss: 0.301.. Validation accuracy: 0.901.. LR : [1.2249999999999998e-05] Epoch 3/3.. Train loss: 2.337.. Validation loss: 0.303.. Validation accuracy: 0.892.. LR : [1.2249999999999998e-05] Epoch 3/3.. Train loss: 2.707.. Validation loss: 0.320.. Validation accuracy: 0.888.. LR : [1.2249999999999998e-05] Epoch 3/3.. Train loss: 3.103.. Validation loss: 0.316.. Validation accuracy: 0.885.. LR : [1.2249999999999998e-05] Epoch 3/3.. Train loss: 3.463.. Validation loss: 0.335.. Validation accuracy: 0.882.. LR : [1.2249999999999998e-05] Epoch 3/3.. Train loss: 3.850.. Validation loss: 0.316.. Validation accuracy: 0.896.. LR : [1.2249999999999998e-05] Epoch 3/3.. Train loss: 4.210.. Validation loss: 0.307.. Validation accuracy: 0.898.. LR : [1.2249999999999998e-05] Epoch 3/3.. Train loss: 4.524.. Validation loss: 0.302.. Validation accuracy: 0.890.. LR : [1.2249999999999998e-05] Epoch 3/3.. Train loss: 4.835.. Validation loss: 0.295.. Validation accuracy: 0.877.. LR : [1.2249999999999998e-05] Epoch 3/3.. Train loss: 5.190.. Validation loss: 0.300.. Validation accuracy: 0.896.. LR : [1.2249999999999998e-05] Epoch 3/3.. Train loss: 5.507.. Validation loss: 0.299.. Validation accuracy: 0.909.. LR : [1.2249999999999998e-05] Epoch 3/3.. Train loss: 5.880.. Validation loss: 0.310.. Validation accuracy: 0.888.. LR : [1.2249999999999998e-05] Epoch 3/3.. Train loss: 6.276.. Validation loss: 0.302.. Validation accuracy: 0.902.. LR : [1.2249999999999998e-05] Epoch 3/3.. Train loss: 6.638.. Validation loss: 0.298.. Validation accuracy: 0.884.. LR : [1.2249999999999998e-05] Epoch 3/3.. Train loss: 7.017.. Validation loss: 0.293.. Validation accuracy: 0.890.. LR : [1.2249999999999998e-05] Epoch 3/3.. Train loss: 7.331.. Validation loss: 0.344.. Validation accuracy: 0.877.. LR : [1.2249999999999998e-05] Epoch 3/3.. Train loss: 7.697.. Validation loss: 0.299.. Validation accuracy: 0.891.. LR : [1.2249999999999998e-05] Epoch 3/3.. Train loss: 8.053.. Validation loss: 0.313.. Validation accuracy: 0.884.. LR : [1.2249999999999998e-05] Epoch 3/3.. Train loss: 8.422.. Validation loss: 0.310.. Validation accuracy: 0.894.. LR : [1.2249999999999998e-05] Epoch 3/3.. Train loss: 8.765.. Validation loss: 0.301.. Validation accuracy: 0.893.. LR : [1.2249999999999998e-05] Epoch 3/3.. Train loss: 9.056.. Validation loss: 0.304.. Validation accuracy: 0.885.. LR : [1.2249999999999998e-05] Epoch 3/3.. Train loss: 9.427.. Validation loss: 0.308.. Validation accuracy: 0.889.. LR : [1.2249999999999998e-05] Epoch 3/3.. Train loss: 9.824.. Validation loss: 0.289.. Validation accuracy: 0.902.. LR : [1.2249999999999998e-05] Epoch 3/3.. Train loss: 10.110.. Validation loss: 0.297.. Validation accuracy: 0.902.. LR : [1.2249999999999998e-05] Epoch 3/3.. Train loss: 10.486.. Validation loss: 0.298.. Validation accuracy: 0.893.. LR : [1.2249999999999998e-05] Epoch 3/3.. Train loss: 10.789.. Validation loss: 0.290.. Validation accuracy: 0.897.. LR : [1.2249999999999998e-05] Epoch 3/3.. Train loss: 11.145.. Validation loss: 0.287.. Validation accuracy: 0.896.. LR : [1.2249999999999998e-05] Epoch 3/3.. Train loss: 11.515.. Validation loss: 0.283.. Validation accuracy: 0.897.. LR : [1.2249999999999998e-05] Epoch 3/3.. Train loss: 11.862.. Validation loss: 0.310.. Validation accuracy: 0.896.. LR : [1.2249999999999998e-05] Epoch 3/3.. Train loss: 12.262.. Validation loss: 0.293.. Validation accuracy: 0.894.. LR : [1.2249999999999998e-05] Epoch 3/3.. Train loss: 12.591.. Validation loss: 0.280.. Validation accuracy: 0.909.. LR : [1.2249999999999998e-05] Epoch 3/3.. Train loss: 12.944.. Validation loss: 0.301.. Validation accuracy: 0.889.. LR : [1.2249999999999998e-05] Epoch 3/3.. Train loss: 13.320.. Validation loss: 0.301.. Validation accuracy: 0.890.. LR : [1.2249999999999998e-05] Epoch 3/3.. Train loss: 13.669.. Validation loss: 0.284.. Validation accuracy: 0.904.. LR : [1.2249999999999998e-05] Epoch 3/3.. Train loss: 14.031.. Validation loss: 0.329.. Validation accuracy: 0.886.. LR : [1.2249999999999998e-05]
Analysis of Training and Validation Losses and Accuracy¶
In [19]:
model.load_state_dict(torch.load("C:/Users/ALEJANDRO/Documents/7. DUKE/1. AIPI 590 - Computer Vision/assignment/First_group_project/scenes_googlenet.pt"))
C:\Users\ALEJANDRO\AppData\Local\Temp\ipykernel_23384\808457093.py:1: FutureWarning: You are using `torch.load` with `weights_only=False` (the current default value), which uses the default pickle module implicitly. It is possible to construct malicious pickle data which will execute arbitrary code during unpickling (See https://github.com/pytorch/pytorch/blob/main/SECURITY.md#untrusted-models for more details). In a future release, the default value for `weights_only` will be flipped to `True`. This limits the functions that could be executed during unpickling. Arbitrary objects will no longer be allowed to be loaded via this mode unless they are explicitly allowlisted by the user via `torch.serialization.add_safe_globals`. We recommend you start setting `weights_only=True` for any use case where you don't have full control of the loaded file. Please open an issue on GitHub for any issues related to this experimental feature.
model.load_state_dict(torch.load("C:/Users/ALEJANDRO/Documents/7. DUKE/1. AIPI 590 - Computer Vision/assignment/First_group_project/scenes_googlenet.pt"))
Out[19]:
<All keys matched successfully>
In [20]:
import pickle
# Save the lists at the end of training
with open('training_metrics_googlenet.pkl', 'wb') as f:
pickle.dump({
'train_losses': train_losses,
'valid_losses': valid_losses,
'valid_acc': valid_acc
}, f)
with open('training_metrics.pkl', 'rb') as f:
data = pickle.load(f)
train_losses = data['train_losses']
valid_losses = data['valid_losses']
valid_acc = data['valid_acc']
In [21]:
plt.plot(train_losses, label='Training loss')
plt.plot(valid_losses, label='Validation loss')
plt.legend(frameon=False)
Out[21]:
<matplotlib.legend.Legend at 0x282ca8f1250>
In [22]:
plt.plot(valid_acc, label='Validation Accuracy')
plt.legend(frameon=False)
Out[22]:
<matplotlib.legend.Legend at 0x282c4d1f5b0>
Model Evaluation¶
In [23]:
model.eval() # Prep model for Evaluation
mean_of = 5 # Mean of how many evaluations
valid_loss = 0.0
class_correct = list(0. for i in range(len(classes))) # List of number of correct predictions in each class
class_total = list(0. for i in range(len(classes))) # List of total number of samples in each class
for i in range(mean_of):
for data, target in test_loader:
# Move the data to device
data, target = data.to(device), target.to(device)
# forward pass: compute predicted outputs by passing inputs to the model
output = model(data)
# calculate the loss
loss = criterion(output, target)
# update test loss
valid_loss += loss.item()*data.size(0)
# convert output probabilities to predicted class
_, pred = torch.max(output, 1)
# compare predictions to true label
correct = np.squeeze(pred.eq(target.data.view_as(pred)))
# calculate test accuracy for each object class
for i in range(len(target)):
label = target.data[i]
if len(target) == 1:
class_correct[label] += correct.item()
else:
class_correct[label] += correct[i].item()
class_total[label] += 1
# calculate and print average test loss
valid_loss = valid_loss/(mean_of * len(test_loader.dataset))
print('Test Loss: {:.6f}\n'.format(valid_loss))
# print accuracy of each class
for i in range(len(classes)):
if class_total[i] > 0:
print('Test Accuracy of %5s: %0.2f%% (%2d/%2d)' % (
str(i), 100 * class_correct[i] / class_total[i],
np.sum(class_correct[i]), np.sum(class_total[i])))
else:
print('Test Accuracy of %5s: N/A (no training examples)' % (classes[i]))
acc = 100. * np.sum(class_correct) / np.sum(class_total)
# print total accuracy of the model
print('\nTest Accuracy (Overall): %0.2f%% (%2d/%2d)' % (
acc,
np.sum(class_correct), np.sum(class_total)))
Test Loss: 0.251647 Test Accuracy of 0: 97.00% (485/500) Test Accuracy of 1: 90.00% (450/500) Test Accuracy of 2: 87.00% (435/500) Test Accuracy of 3: 99.00% (495/500) Test Accuracy of 4: 83.00% (415/500) Test Accuracy (Overall): 91.20% (2280/2500)
In [24]:
## To check on a custom Image
img = Image.open("C:/Users/ALEJANDRO/Documents/7. DUKE/1. AIPI 590 - Computer Vision/assignment/First_group_project/data/test_data/aquarium/Places365_val_00001341.jpg")
# Convert 2D image to 1D vector
images = test_transforms(img)
plt.imshow(denormalise(images))
# Calculate the class probabilities (softmax) for img
with torch.no_grad():
images = images.to(device)
output = model(images.unsqueeze(0))
ps = F.softmax(output, dim =1 )
classes[int(torch.argmax(ps).item())]
Out[24]:
'aquarium'
In [25]:
# Check which images, the model predicted incorrectly with correct and predicted label and the probability
classes
dataiter = iter(test_loader)
images, labels = next(dataiter)
with torch.no_grad():
images, labels = images.to(device), labels.to(device)
output = model(images)
ps = F.softmax(output, dim =1 )
#images = images.cpu().numpy()
fig = plt.figure(figsize=(25,16))
i = 1
for idx in np.arange(64):
pred = torch.argmax(ps[idx]).item()
prob = ps[idx][pred]
if labels[idx].item() != pred:
ax = fig.add_subplot(8, 8, i, xticks=[], yticks=[])
ax.imshow(denormalise(images[idx]), cmap='gray')
ax.set_title((str(labels[idx].item())+':'+str(pred))+', '+str(prob.item()))
i+=1
In [26]:
import numpy as np
import torch
from sklearn.metrics import confusion_matrix
import seaborn as sns
import matplotlib.pyplot as plt
import torch.nn.functional as F
# Assuming 'test_loader' is your DataLoader and 'classes' contains the class names
all_preds = []
all_labels = []
# Iterate through the test dataset to gather predictions and labels
dataiter = iter(test_loader)
for images, labels in dataiter:
with torch.no_grad():
images, labels = images.to(device), labels.to(device)
output = model(images)
ps = F.softmax(output, dim=1)
# Get predictions
preds = torch.argmax(ps, dim=1)
# Store predictions and true labels
all_preds.extend(preds.cpu().numpy())
all_labels.extend(labels.cpu().numpy())
# Create the confusion matrix
cm = confusion_matrix(all_labels, all_preds)
# Plotting the confusion matrix
plt.figure(figsize=(10, 8))
sns.heatmap(cm, annot=True, fmt='d', cmap='Blues',
xticklabels=[classes[i] for i in range(len(classes))],
yticklabels=[classes[i] for i in range(len(classes))])
plt.ylabel('True label')
plt.xlabel('Predicted label')
plt.title('Confusion Matrix')
plt.show()